funzioni utili

##block functions

block_selector = function(id_block){
    blocco = dati2[indici2==id_block,]
    return(blocco)
}

freq_selector = function(data, id_freq, polar=F, mode = 1){
    ids = c(id_freq*2 - 1, id_freq*2)
    ampl_phase = data[,ids]
    return(ampl_phase)
}

Mode = function(vet){
    return(as.numeric(names(sort(-table(vet))))[1])
}

output_selector = function(id_block){
    out = rank2[indici2==id_block]
    out = Mode(out)
    return(out[1])
}


block_plot = function(blocco, id_freq=1, ampl_mode=1, rm_out = F, plot_mode = 1,  main='', lim = c(-1,1)){
    blocco = blocco[,(id_freq*2-1):(id_freq*2)]

        ampl = blocco[,1]
    phas = blocco[,2]

    if (rm_out){
        zs = abs(scale(ampl))
        out_id = which(zs > 4)
        ampl = ampl[-out_id]
        phas = phas[-out_id]
    }
    
    if (ampl_mode == 1){
        r = ampl/(max(abs(ampl)))
    }
    if (ampl_mode == 2){
        r = scale(ampl)
    }    
    if (ampl_mode == 3){
        r = (ampl - min(ampl))/(max(abs(ampl)) - min(ampl)) #scale(ampl)
    }
    
    t = phas
    x = r*cos(t)
    y = r*sin(t)

    if (abs_val){
        x = abs(x)
        y = abs(y)
    }
    
    t = phas
    x = r*cos(t)
    y = r*sin(t)

    if (plot_mode==1){
        plot(x,y, main=main, xlim=lim, ylim=lim)
    }
    if (plot_mode==2){
        plot(abs(x), abs(y), main=main, xlim=lim, ylim=lim)
    }
    if (plot_mode==3){
        plot(r, t, main=main, xlim=lim, ylim=lim)
    }
    
}


block_to_image = function(blocco, breaks = seq(0,1, 0.1), ampl_mode = 1, rm_out = F, abs_val=T){
  
    #converte una frequenza di ampiezza-fase in una tabella di frequenza relativa  

    ampl = blocco[,1]
    phas = blocco[,2]

    if (rm_out){
        zs = abs(scale(ampl))
        out_id = which(zs > 4)
        ampl = ampl[-out_id]
        phas = phas[-out_id]
    }
    
    if (ampl_mode == 1){
        r = ampl/(max(abs(ampl)))
    }
    if (ampl_mode == 2){
        r = scale(ampl)
    }    
    if (ampl_mode == 3){
        r = (ampl - min(ampl))/(max(abs(ampl)) - min(ampl)) #scale(ampl)
    }
    
    t = phas
    x = r*cos(t)
    y = r*sin(t)

    if (abs_val){
        x = abs(x)
        y = abs(y)
    }

    #cx = cut(x, breaks=c(-Inf,0.5,1,2,3,4,5,6,Inf))
    #cy = cut(y, breaks=c(-Inf,0.5,1,2,3,4,5,6,Inf))
    cx = cut(x, breaks=breaks, include.lowest=T)
    cy = cut(y, breaks=breaks, include.lowest=T)
    tab = table(cx,cy)
    tab = tab/sum(tab)
    return(tab)
}


block_to_array = function(blocco){
    ## funzione per trasformare un blocco in un array
    freqs = 1:56
    arr = array(NA, dim=c(10,10,56))
    
    for (i in freqs){
        id_freq = i
        id_var = (id_freq*2-1):(id_freq*2)
        blo = blocco[,id_var]
        arr[,,i] = block_to_image(blo)
    }
    return(arr)
}


block_sampler = function(blocco, output, nsamples=5, sample_length=1000){

    #campiona nsamples campioni di lunghezza sample_length da un blocco dato
    N = nrow(blocco)
    input = list()
    
    for (i in 1:nsamples){
        ids = sample(1:N, sample_length, replace=T)
        input[[i]] = blocco[ids,]
    }

    output = rep(output, nsamples)    

    return(list(input=input,output=output))
}


block_time_sampler = function(blocco, output, nsamples=5, sample_length=1000){
    # funzione per campionare nsample campioni di lunghezza sample_length da un blocco dato
    # a tutti i quali viene associato un output unico, proprio del blocco assegnato
    N = nrow(blocco)
    input = list()
    k = sample_length
    
    if (N>=sample_length){
        
        for (i in 1:nsamples){
            start = sample(1:(N-k), 1)
            input[[i]] = blocco[start:(start+k-1),]
        }
        
    }else{
        stop('not enougth data')
    }
    output = rep(output, nsamples)    

    return(list(input=input,output=output))
}



block_divider = function(blocco, output, nsamples=5){
    #divide un blocco in nsamples blocchi di uguale lunghezza
    N = nrow(blocco)

    ids = seq(1,N, round(N/(nsamples)))
    
    input = list()
    
    for (i in 1:(length(ids)-1)){
        start = ids[i]
        end = ids[i+1] - 1
        input[[i]] = blocco[start:end,]
    }

    output = rep(output, nsamples)    

    return(list(input=input,output=output))
}


arraylist2array = function(arraylist){
    # list of arrays to songle array with one more dimension
    
    N = length(arraylist)
    totarr = array(NA, dim=c(N, dim(arraylist[[1]])))
    
    for (i in 1:N){
        totarr[i,,,] = arraylist[[i]]
    }
    return(totarr)
}


one_hot_encode <- function(labels, num_classes) {
  # return a matrix of one-hot encoded labels
  
  # Create a matrix of zeros
  encoded_labels <- matrix(0, nrow = length(labels), ncol = num_classes)
  
  # Set the appropriate index to 1 for each label
  for (i in seq_along(labels)) {
    encoded_labels[i, labels[i] + 1] <- 1
  }
  
  return(encoded_labels)
}

importa dati e primo preprocessing

start = Sys.time()
dati = read.csv("~/Fede/Unimib/ldc/LAB DATA CHALLENGE 2025 (dataset)/nuovi dati/LDC2025_training.csv", header=FALSE)
  #read.csv('/kaggle/input/stmicroelectronics-lab-data-challenge-unimib/LDC2025_training.csv',header=F)

print('tempo impiegato per caricare il dataset')
## [1] "tempo impiegato per caricare il dataset"
Sys.time() - start
## Time difference of 47.88315 secs
start = Sys.time()
id_col_cat = 113:115
colnames(dati)[id_col_cat] = c('none', 'one', 'move')

## id even and odd
id_pari = seq(1,112,2)
id_dispari = seq(2,112,2)

colnames(dati)[id_pari] = paste0('ampl', 1:56) #pari = ampiezza
colnames(dati)[id_dispari] = paste0('phase', 1:56) #dispari = 


#basic prepro
colnames(dati)[113:115] = c('none', 'one', 'move')
cat = ifelse(dati$none==1, 'none', ifelse(dati$one==1, 'one', 'move'))
rank = ifelse(dati$none==1, 0, ifelse(dati$one==1, 1, 2))

print('tempo impiegato per il primo prepro')
## [1] "tempo impiegato per il primo prepro"
Sys.time() - start
## Time difference of 0.3970132 secs
start = Sys.time()

secondo preprocessing

##outliers ids
f1 = dati[,1]
ids = outliers = c(1,which(f1==0))

##clusters blocks ids
indici = numeric(length(f1))
clusters = 1:(length(ids)-1)
for (i in clusters){
    indici[ids[i]:ids[i+1]] = i
}

##outliers removal
dati2 = dati[-outliers,]
indici2 = indici[-outliers]
rank2 = rank[-outliers]

print('tempo impiegato per il secondo prepro')
## [1] "tempo impiegato per il secondo prepro"
Sys.time() - start
## Time difference of 15.79682 secs
##eda on cluster blocks dimensions
#tab = table(indici2)
#dimclass = cut(tab, breaks = c(1,50, 100, 500, 1000, 10000, Inf))
#tab
#table(dimclass)

terzo e ultimo preprocessing

Comprende train test splitting e data augmentation

## train sampling

start = Sys.time()

train_id = sample(1:59, 40)
ns = 20

train_input = list()
train_output = c()

print('train build start')
## [1] "train build start"
pb = txtProgressBar(min = 0, max = length(train_id), initial = 0) 
for (i in train_id){
    blocco = block_selector(i)
    myout = output_selector(i)

    sl = sample(100:500, 1)
    if (nrow(blocco) > sl){
        blocchi = block_time_sampler(blocco, output_selector(i), 
                                     nsamples = ns, 
                                     sample_length=sl)
    
        new_inputs = list()
        for (j in 1:ns){
            new_inputs[[j]] = block_to_array(blocchi$input[[j]])
        }
        
        train_input = c(train_input, new_inputs)
        train_output =c(train_output, rep(myout, ns))
    }
    
    setTxtProgressBar(pb,i)
}
## ==========================================                                          ================================                                ================================================================================                                                                                ========================================================================                                                                        ================================================                                                ==================================================                                                  ====================================================================                                                                    ==================                  ============================================================================                                                                            ==================================================================                                                                  ======================================================                                                      ==========================================================                                                          ========================================================
print('training build complete')
## [1] "training build complete"
Sys.time() - start
## Time difference of 29.71841 secs
## test sampling

start = Sys.time()
print('test build start')
## [1] "test build start"
test_id = (1:59)[-train_id]
ns = 20

test_input = list()
test_output = c()

pd = txtProgressBar(min = 0, max = length(test_id), initial = 0) 
for (i in test_id){
    blocco = block_selector(i)
    myout = output_selector(i)

    sl = sample(100:500, 1)
    if (nrow(blocco) > sl){
        blocchi = block_time_sampler(blocco, output_selector(i), 
                                     nsamples = ns, 
                                     sample_length=sl)
    
        new_inputs = list()
        for (j in 1:ns){
            new_inputs[[j]] = block_to_array(blocchi$input[[j]])
        }
        
        test_input = c(test_input, new_inputs)
        test_output =c(test_output, rep(myout, ns))
    }
    
    setTxtProgressBar(pd,i)
}
## ============================================================================
print('test build complete')
## [1] "test build complete"
Sys.time()-start
## Time difference of 15.50902 secs
dim(train_input[[1]])
## [1] 10 10 56
length(train_input)
## [1] 560
length(test_input)
## [1] 320
length(train_output)
## [1] 560
length(test_output)
## [1] 320
## last preprocess for CNN

start = Sys.time()
print('last prepro start')
## [1] "last prepro start"
# list of arrays to songle array with one more dimension

train_x = arraylist2array(train_input)
test_x = arraylist2array(test_input)

# labels to onehot

train_y = one_hot_encode(train_output, 3)
test_y = one_hot_encode(test_output, 3)

print('last prepro complete')
## [1] "last prepro complete"
Sys.time() - start
## Time difference of 0.0962851 secs
dim(train_x)
## [1] 560  10  10  56
dim(test_x)
## [1] 320  10  10  56
dim(train_y)
## [1] 560   3
dim(test_y)
## [1] 320   3

Osserva quali blocchi sono in train e quali in test

train_id
##  [1] 21 16 54  3 52 19 31 41 40 36 42 47  8 50 24 57  7 53 25 48 17 44 34  9  4
## [26] 32 45 38  2 33 43 51  5 27 20 59 26 56 29 28
'___'
## [1] "___"
test_id
##  [1]  1  6 10 11 12 13 14 15 18 22 23 30 35 37 39 46 49 55 58

CNN

Architecture

# model structure

library(keras)
## Warning: il pacchetto 'keras' è stato creato con R versione 4.4.2
model_NN <- keras_model_sequential() %>%
  layer_conv_2d(filters = 10, kernel_size = c(2,2), activation = "relu", input_shape = c(10, 10, 56)) %>%
  layer_max_pooling_2d(pool_size = c(2, 2)) %>%
  layer_flatten() %>%
  layer_dense(units = 56, activation = "relu") %>%
  layer_dropout(rate = 0.5) %>%
  layer_dense(units = 56, activation = "relu") %>%
  layer_dropout(rate = 0.9) %>%
  layer_dense(units = 3, activation = "softmax") %>%
compile(
  optimizer = "adam",
  loss = "categorical_crossentropy",
  metrics = c("categorical_accuracy")
)

summary(model_NN)
## Model: "sequential"
## ________________________________________________________________________________
##  Layer (type)                       Output Shape                    Param #     
## ================================================================================
##  conv2d (Conv2D)                    (None, 9, 9, 10)                2250        
##  max_pooling2d (MaxPooling2D)       (None, 4, 4, 10)                0           
##  flatten (Flatten)                  (None, 160)                     0           
##  dense_2 (Dense)                    (None, 56)                      9016        
##  dropout_1 (Dropout)                (None, 56)                      0           
##  dense_1 (Dense)                    (None, 56)                      3192        
##  dropout (Dropout)                  (None, 56)                      0           
##  dense (Dense)                      (None, 3)                       171         
## ================================================================================
## Total params: 14629 (57.14 KB)
## Trainable params: 14629 (57.14 KB)
## Non-trainable params: 0 (0.00 Byte)
## ________________________________________________________________________________

Fit

## final training
start = Sys.time()

history1 <- model_NN %>% fit(
  x = train_x,
  y = train_y,
  batch_size = 50,  # Dimensione del batch
  epochs = 100,      # Numero di epoche
  validation_split = 0.2  # Percentuale di dati di validazione
)
## Epoch 1/100
## 9/9 - 3s - loss: 1.0809 - categorical_accuracy: 0.4464 - val_loss: 1.0552 - val_categorical_accuracy: 0.6429 - 3s/epoch - 369ms/step
## Epoch 2/100
## 9/9 - 0s - loss: 1.0479 - categorical_accuracy: 0.5290 - val_loss: 1.0047 - val_categorical_accuracy: 0.6429 - 230ms/epoch - 26ms/step
## Epoch 3/100
## 9/9 - 0s - loss: 1.0051 - categorical_accuracy: 0.5625 - val_loss: 0.9584 - val_categorical_accuracy: 0.6429 - 178ms/epoch - 20ms/step
## Epoch 4/100
## 9/9 - 0s - loss: 0.9812 - categorical_accuracy: 0.5692 - val_loss: 0.9147 - val_categorical_accuracy: 0.6429 - 175ms/epoch - 19ms/step
## Epoch 5/100
## 9/9 - 0s - loss: 0.9262 - categorical_accuracy: 0.6027 - val_loss: 0.8766 - val_categorical_accuracy: 0.6429 - 169ms/epoch - 19ms/step
## Epoch 6/100
## 9/9 - 0s - loss: 0.9006 - categorical_accuracy: 0.5960 - val_loss: 0.8392 - val_categorical_accuracy: 0.6429 - 166ms/epoch - 18ms/step
## Epoch 7/100
## 9/9 - 0s - loss: 0.8454 - categorical_accuracy: 0.6339 - val_loss: 0.7741 - val_categorical_accuracy: 0.6429 - 171ms/epoch - 19ms/step
## Epoch 8/100
## 9/9 - 0s - loss: 0.8152 - categorical_accuracy: 0.6317 - val_loss: 0.7051 - val_categorical_accuracy: 0.6429 - 170ms/epoch - 19ms/step
## Epoch 9/100
## 9/9 - 0s - loss: 0.7811 - categorical_accuracy: 0.6384 - val_loss: 0.6565 - val_categorical_accuracy: 0.6429 - 167ms/epoch - 19ms/step
## Epoch 10/100
## 9/9 - 0s - loss: 0.7392 - categorical_accuracy: 0.6607 - val_loss: 0.6015 - val_categorical_accuracy: 0.6429 - 178ms/epoch - 20ms/step
## Epoch 11/100
## 9/9 - 0s - loss: 0.6711 - categorical_accuracy: 0.6786 - val_loss: 0.5700 - val_categorical_accuracy: 0.6429 - 167ms/epoch - 19ms/step
## Epoch 12/100
## 9/9 - 0s - loss: 0.6575 - categorical_accuracy: 0.6808 - val_loss: 0.5468 - val_categorical_accuracy: 0.6429 - 165ms/epoch - 18ms/step
## Epoch 13/100
## 9/9 - 0s - loss: 0.6341 - categorical_accuracy: 0.6897 - val_loss: 0.5378 - val_categorical_accuracy: 0.6429 - 166ms/epoch - 18ms/step
## Epoch 14/100
## 9/9 - 0s - loss: 0.6082 - categorical_accuracy: 0.7054 - val_loss: 0.5282 - val_categorical_accuracy: 0.6429 - 171ms/epoch - 19ms/step
## Epoch 15/100
## 9/9 - 0s - loss: 0.6001 - categorical_accuracy: 0.7299 - val_loss: 0.5478 - val_categorical_accuracy: 0.6429 - 166ms/epoch - 18ms/step
## Epoch 16/100
## 9/9 - 0s - loss: 0.5963 - categorical_accuracy: 0.7388 - val_loss: 0.5256 - val_categorical_accuracy: 0.6429 - 167ms/epoch - 19ms/step
## Epoch 17/100
## 9/9 - 0s - loss: 0.5950 - categorical_accuracy: 0.7344 - val_loss: 0.5335 - val_categorical_accuracy: 0.6429 - 169ms/epoch - 19ms/step
## Epoch 18/100
## 9/9 - 0s - loss: 0.5070 - categorical_accuracy: 0.7701 - val_loss: 0.5605 - val_categorical_accuracy: 0.6429 - 178ms/epoch - 20ms/step
## Epoch 19/100
## 9/9 - 0s - loss: 0.5194 - categorical_accuracy: 0.7656 - val_loss: 0.5441 - val_categorical_accuracy: 0.6429 - 170ms/epoch - 19ms/step
## Epoch 20/100
## 9/9 - 0s - loss: 0.4815 - categorical_accuracy: 0.8013 - val_loss: 0.5884 - val_categorical_accuracy: 0.6429 - 168ms/epoch - 19ms/step
## Epoch 21/100
## 9/9 - 0s - loss: 0.4638 - categorical_accuracy: 0.8147 - val_loss: 0.5798 - val_categorical_accuracy: 0.6429 - 171ms/epoch - 19ms/step
## Epoch 22/100
## 9/9 - 0s - loss: 0.4773 - categorical_accuracy: 0.7924 - val_loss: 0.5384 - val_categorical_accuracy: 0.6429 - 172ms/epoch - 19ms/step
## Epoch 23/100
## 9/9 - 0s - loss: 0.4626 - categorical_accuracy: 0.8170 - val_loss: 0.5740 - val_categorical_accuracy: 0.6429 - 172ms/epoch - 19ms/step
## Epoch 24/100
## 9/9 - 0s - loss: 0.4143 - categorical_accuracy: 0.8326 - val_loss: 0.5918 - val_categorical_accuracy: 0.6429 - 167ms/epoch - 19ms/step
## Epoch 25/100
## 9/9 - 0s - loss: 0.4232 - categorical_accuracy: 0.8371 - val_loss: 0.5773 - val_categorical_accuracy: 0.6429 - 189ms/epoch - 21ms/step
## Epoch 26/100
## 9/9 - 0s - loss: 0.3925 - categorical_accuracy: 0.8192 - val_loss: 0.6020 - val_categorical_accuracy: 0.6429 - 166ms/epoch - 18ms/step
## Epoch 27/100
## 9/9 - 0s - loss: 0.3713 - categorical_accuracy: 0.8281 - val_loss: 0.5647 - val_categorical_accuracy: 0.6429 - 167ms/epoch - 19ms/step
## Epoch 28/100
## 9/9 - 0s - loss: 0.3563 - categorical_accuracy: 0.8415 - val_loss: 0.6031 - val_categorical_accuracy: 0.6429 - 166ms/epoch - 18ms/step
## Epoch 29/100
## 9/9 - 0s - loss: 0.3654 - categorical_accuracy: 0.8415 - val_loss: 0.6375 - val_categorical_accuracy: 0.6429 - 175ms/epoch - 19ms/step
## Epoch 30/100
## 9/9 - 0s - loss: 0.3621 - categorical_accuracy: 0.8460 - val_loss: 0.5384 - val_categorical_accuracy: 0.6429 - 169ms/epoch - 19ms/step
## Epoch 31/100
## 9/9 - 0s - loss: 0.3361 - categorical_accuracy: 0.8348 - val_loss: 0.5779 - val_categorical_accuracy: 0.6429 - 166ms/epoch - 18ms/step
## Epoch 32/100
## 9/9 - 0s - loss: 0.3309 - categorical_accuracy: 0.8348 - val_loss: 0.5789 - val_categorical_accuracy: 0.6429 - 168ms/epoch - 19ms/step
## Epoch 33/100
## 9/9 - 0s - loss: 0.2965 - categorical_accuracy: 0.8728 - val_loss: 0.5573 - val_categorical_accuracy: 0.6429 - 166ms/epoch - 18ms/step
## Epoch 34/100
## 9/9 - 0s - loss: 0.3089 - categorical_accuracy: 0.8527 - val_loss: 0.6036 - val_categorical_accuracy: 0.6429 - 165ms/epoch - 18ms/step
## Epoch 35/100
## 9/9 - 0s - loss: 0.3027 - categorical_accuracy: 0.8549 - val_loss: 0.5939 - val_categorical_accuracy: 0.6429 - 167ms/epoch - 19ms/step
## Epoch 36/100
## 9/9 - 0s - loss: 0.2821 - categorical_accuracy: 0.8594 - val_loss: 0.5949 - val_categorical_accuracy: 0.6429 - 171ms/epoch - 19ms/step
## Epoch 37/100
## 9/9 - 0s - loss: 0.2695 - categorical_accuracy: 0.8594 - val_loss: 0.5782 - val_categorical_accuracy: 0.6429 - 169ms/epoch - 19ms/step
## Epoch 38/100
## 9/9 - 0s - loss: 0.2774 - categorical_accuracy: 0.8549 - val_loss: 0.5502 - val_categorical_accuracy: 0.6429 - 164ms/epoch - 18ms/step
## Epoch 39/100
## 9/9 - 0s - loss: 0.2514 - categorical_accuracy: 0.8683 - val_loss: 0.5804 - val_categorical_accuracy: 0.6429 - 170ms/epoch - 19ms/step
## Epoch 40/100
## 9/9 - 0s - loss: 0.2707 - categorical_accuracy: 0.8616 - val_loss: 0.5568 - val_categorical_accuracy: 0.6429 - 172ms/epoch - 19ms/step
## Epoch 41/100
## 9/9 - 0s - loss: 0.2665 - categorical_accuracy: 0.8638 - val_loss: 0.5577 - val_categorical_accuracy: 0.7054 - 173ms/epoch - 19ms/step
## Epoch 42/100
## 9/9 - 0s - loss: 0.2570 - categorical_accuracy: 0.8728 - val_loss: 0.5642 - val_categorical_accuracy: 0.7054 - 169ms/epoch - 19ms/step
## Epoch 43/100
## 9/9 - 0s - loss: 0.2469 - categorical_accuracy: 0.8705 - val_loss: 0.4749 - val_categorical_accuracy: 0.6875 - 173ms/epoch - 19ms/step
## Epoch 44/100
## 9/9 - 0s - loss: 0.2680 - categorical_accuracy: 0.8817 - val_loss: 0.5391 - val_categorical_accuracy: 0.6786 - 172ms/epoch - 19ms/step
## Epoch 45/100
## 9/9 - 0s - loss: 0.2370 - categorical_accuracy: 0.8661 - val_loss: 0.5641 - val_categorical_accuracy: 0.7321 - 164ms/epoch - 18ms/step
## Epoch 46/100
## 9/9 - 0s - loss: 0.2093 - categorical_accuracy: 0.8817 - val_loss: 0.4995 - val_categorical_accuracy: 0.7589 - 165ms/epoch - 18ms/step
## Epoch 47/100
## 9/9 - 0s - loss: 0.2234 - categorical_accuracy: 0.8839 - val_loss: 0.4966 - val_categorical_accuracy: 0.7589 - 164ms/epoch - 18ms/step
## Epoch 48/100
## 9/9 - 0s - loss: 0.2092 - categorical_accuracy: 0.8884 - val_loss: 0.5538 - val_categorical_accuracy: 0.7857 - 169ms/epoch - 19ms/step
## Epoch 49/100
## 9/9 - 0s - loss: 0.2173 - categorical_accuracy: 0.8929 - val_loss: 0.5357 - val_categorical_accuracy: 0.8125 - 163ms/epoch - 18ms/step
## Epoch 50/100
## 9/9 - 0s - loss: 0.2369 - categorical_accuracy: 0.8750 - val_loss: 0.4753 - val_categorical_accuracy: 0.8304 - 169ms/epoch - 19ms/step
## Epoch 51/100
## 9/9 - 0s - loss: 0.2112 - categorical_accuracy: 0.8817 - val_loss: 0.4571 - val_categorical_accuracy: 0.8482 - 168ms/epoch - 19ms/step
## Epoch 52/100
## 9/9 - 0s - loss: 0.2065 - categorical_accuracy: 0.8906 - val_loss: 0.4769 - val_categorical_accuracy: 0.8482 - 165ms/epoch - 18ms/step
## Epoch 53/100
## 9/9 - 0s - loss: 0.2286 - categorical_accuracy: 0.8973 - val_loss: 0.4642 - val_categorical_accuracy: 0.8571 - 167ms/epoch - 19ms/step
## Epoch 54/100
## 9/9 - 0s - loss: 0.2382 - categorical_accuracy: 0.8705 - val_loss: 0.4305 - val_categorical_accuracy: 0.8750 - 168ms/epoch - 19ms/step
## Epoch 55/100
## 9/9 - 0s - loss: 0.2118 - categorical_accuracy: 0.8884 - val_loss: 0.4429 - val_categorical_accuracy: 0.8571 - 168ms/epoch - 19ms/step
## Epoch 56/100
## 9/9 - 0s - loss: 0.1963 - categorical_accuracy: 0.8906 - val_loss: 0.4970 - val_categorical_accuracy: 0.8304 - 167ms/epoch - 19ms/step
## Epoch 57/100
## 9/9 - 0s - loss: 0.1849 - categorical_accuracy: 0.9018 - val_loss: 0.5007 - val_categorical_accuracy: 0.8393 - 168ms/epoch - 19ms/step
## Epoch 58/100
## 9/9 - 0s - loss: 0.1903 - categorical_accuracy: 0.8973 - val_loss: 0.4417 - val_categorical_accuracy: 0.8482 - 168ms/epoch - 19ms/step
## Epoch 59/100
## 9/9 - 0s - loss: 0.2084 - categorical_accuracy: 0.8839 - val_loss: 0.4342 - val_categorical_accuracy: 0.8661 - 167ms/epoch - 19ms/step
## Epoch 60/100
## 9/9 - 0s - loss: 0.2158 - categorical_accuracy: 0.8817 - val_loss: 0.4165 - val_categorical_accuracy: 0.8661 - 167ms/epoch - 19ms/step
## Epoch 61/100
## 9/9 - 0s - loss: 0.1940 - categorical_accuracy: 0.8817 - val_loss: 0.4094 - val_categorical_accuracy: 0.8661 - 165ms/epoch - 18ms/step
## Epoch 62/100
## 9/9 - 0s - loss: 0.2000 - categorical_accuracy: 0.8929 - val_loss: 0.4150 - val_categorical_accuracy: 0.8750 - 164ms/epoch - 18ms/step
## Epoch 63/100
## 9/9 - 0s - loss: 0.1758 - categorical_accuracy: 0.9085 - val_loss: 0.5046 - val_categorical_accuracy: 0.8661 - 168ms/epoch - 19ms/step
## Epoch 64/100
## 9/9 - 0s - loss: 0.2177 - categorical_accuracy: 0.8661 - val_loss: 0.4151 - val_categorical_accuracy: 0.8839 - 167ms/epoch - 19ms/step
## Epoch 65/100
## 9/9 - 0s - loss: 0.1871 - categorical_accuracy: 0.8839 - val_loss: 0.3716 - val_categorical_accuracy: 0.8839 - 170ms/epoch - 19ms/step
## Epoch 66/100
## 9/9 - 0s - loss: 0.1876 - categorical_accuracy: 0.8929 - val_loss: 0.4620 - val_categorical_accuracy: 0.8571 - 165ms/epoch - 18ms/step
## Epoch 67/100
## 9/9 - 0s - loss: 0.1838 - categorical_accuracy: 0.9018 - val_loss: 0.4768 - val_categorical_accuracy: 0.8482 - 174ms/epoch - 19ms/step
## Epoch 68/100
## 9/9 - 0s - loss: 0.1732 - categorical_accuracy: 0.9129 - val_loss: 0.4221 - val_categorical_accuracy: 0.8750 - 166ms/epoch - 18ms/step
## Epoch 69/100
## 9/9 - 0s - loss: 0.1774 - categorical_accuracy: 0.8929 - val_loss: 0.3297 - val_categorical_accuracy: 0.9196 - 164ms/epoch - 18ms/step
## Epoch 70/100
## 9/9 - 0s - loss: 0.1923 - categorical_accuracy: 0.8772 - val_loss: 0.3486 - val_categorical_accuracy: 0.9107 - 174ms/epoch - 19ms/step
## Epoch 71/100
## 9/9 - 0s - loss: 0.1755 - categorical_accuracy: 0.9085 - val_loss: 0.4214 - val_categorical_accuracy: 0.8750 - 168ms/epoch - 19ms/step
## Epoch 72/100
## 9/9 - 0s - loss: 0.1600 - categorical_accuracy: 0.9107 - val_loss: 0.4573 - val_categorical_accuracy: 0.8750 - 168ms/epoch - 19ms/step
## Epoch 73/100
## 9/9 - 0s - loss: 0.1637 - categorical_accuracy: 0.9085 - val_loss: 0.3847 - val_categorical_accuracy: 0.8839 - 167ms/epoch - 19ms/step
## Epoch 74/100
## 9/9 - 0s - loss: 0.1775 - categorical_accuracy: 0.8929 - val_loss: 0.3812 - val_categorical_accuracy: 0.8839 - 168ms/epoch - 19ms/step
## Epoch 75/100
## 9/9 - 0s - loss: 0.1388 - categorical_accuracy: 0.9241 - val_loss: 0.4195 - val_categorical_accuracy: 0.8750 - 170ms/epoch - 19ms/step
## Epoch 76/100
## 9/9 - 0s - loss: 0.1382 - categorical_accuracy: 0.9286 - val_loss: 0.4315 - val_categorical_accuracy: 0.8661 - 198ms/epoch - 22ms/step
## Epoch 77/100
## 9/9 - 0s - loss: 0.1692 - categorical_accuracy: 0.9129 - val_loss: 0.3903 - val_categorical_accuracy: 0.8750 - 168ms/epoch - 19ms/step
## Epoch 78/100
## 9/9 - 0s - loss: 0.1447 - categorical_accuracy: 0.9107 - val_loss: 0.3061 - val_categorical_accuracy: 0.9196 - 166ms/epoch - 18ms/step
## Epoch 79/100
## 9/9 - 0s - loss: 0.1687 - categorical_accuracy: 0.9152 - val_loss: 0.2503 - val_categorical_accuracy: 0.9286 - 172ms/epoch - 19ms/step
## Epoch 80/100
## 9/9 - 0s - loss: 0.1390 - categorical_accuracy: 0.9263 - val_loss: 0.3547 - val_categorical_accuracy: 0.8929 - 165ms/epoch - 18ms/step
## Epoch 81/100
## 9/9 - 0s - loss: 0.1722 - categorical_accuracy: 0.8951 - val_loss: 0.3521 - val_categorical_accuracy: 0.8929 - 165ms/epoch - 18ms/step
## Epoch 82/100
## 9/9 - 0s - loss: 0.1509 - categorical_accuracy: 0.9174 - val_loss: 0.2952 - val_categorical_accuracy: 0.9286 - 166ms/epoch - 18ms/step
## Epoch 83/100
## 9/9 - 0s - loss: 0.1431 - categorical_accuracy: 0.9196 - val_loss: 0.3157 - val_categorical_accuracy: 0.9018 - 166ms/epoch - 18ms/step
## Epoch 84/100
## 9/9 - 0s - loss: 0.1366 - categorical_accuracy: 0.9330 - val_loss: 0.3342 - val_categorical_accuracy: 0.8929 - 166ms/epoch - 18ms/step
## Epoch 85/100
## 9/9 - 0s - loss: 0.1642 - categorical_accuracy: 0.9107 - val_loss: 0.3508 - val_categorical_accuracy: 0.8929 - 166ms/epoch - 18ms/step
## Epoch 86/100
## 9/9 - 0s - loss: 0.1541 - categorical_accuracy: 0.9085 - val_loss: 0.3610 - val_categorical_accuracy: 0.8929 - 167ms/epoch - 19ms/step
## Epoch 87/100
## 9/9 - 0s - loss: 0.1634 - categorical_accuracy: 0.9085 - val_loss: 0.3938 - val_categorical_accuracy: 0.8839 - 163ms/epoch - 18ms/step
## Epoch 88/100
## 9/9 - 0s - loss: 0.1353 - categorical_accuracy: 0.9263 - val_loss: 0.3488 - val_categorical_accuracy: 0.8929 - 164ms/epoch - 18ms/step
## Epoch 89/100
## 9/9 - 0s - loss: 0.1439 - categorical_accuracy: 0.9040 - val_loss: 0.4086 - val_categorical_accuracy: 0.8750 - 168ms/epoch - 19ms/step
## Epoch 90/100
## 9/9 - 0s - loss: 0.1752 - categorical_accuracy: 0.8862 - val_loss: 0.3806 - val_categorical_accuracy: 0.8839 - 169ms/epoch - 19ms/step
## Epoch 91/100
## 9/9 - 0s - loss: 0.1208 - categorical_accuracy: 0.9509 - val_loss: 0.3470 - val_categorical_accuracy: 0.8929 - 166ms/epoch - 18ms/step
## Epoch 92/100
## 9/9 - 0s - loss: 0.1898 - categorical_accuracy: 0.8884 - val_loss: 0.3530 - val_categorical_accuracy: 0.8929 - 162ms/epoch - 18ms/step
## Epoch 93/100
## 9/9 - 0s - loss: 0.1298 - categorical_accuracy: 0.9263 - val_loss: 0.3061 - val_categorical_accuracy: 0.9018 - 162ms/epoch - 18ms/step
## Epoch 94/100
## 9/9 - 0s - loss: 0.1307 - categorical_accuracy: 0.9174 - val_loss: 0.3416 - val_categorical_accuracy: 0.8929 - 165ms/epoch - 18ms/step
## Epoch 95/100
## 9/9 - 0s - loss: 0.1403 - categorical_accuracy: 0.9219 - val_loss: 0.3742 - val_categorical_accuracy: 0.8929 - 167ms/epoch - 19ms/step
## Epoch 96/100
## 9/9 - 0s - loss: 0.1712 - categorical_accuracy: 0.8906 - val_loss: 0.3933 - val_categorical_accuracy: 0.8839 - 170ms/epoch - 19ms/step
## Epoch 97/100
## 9/9 - 0s - loss: 0.1585 - categorical_accuracy: 0.9085 - val_loss: 0.3775 - val_categorical_accuracy: 0.8929 - 165ms/epoch - 18ms/step
## Epoch 98/100
## 9/9 - 0s - loss: 0.1303 - categorical_accuracy: 0.9107 - val_loss: 0.3218 - val_categorical_accuracy: 0.9018 - 164ms/epoch - 18ms/step
## Epoch 99/100
## 9/9 - 0s - loss: 0.1423 - categorical_accuracy: 0.9107 - val_loss: 0.3283 - val_categorical_accuracy: 0.9018 - 162ms/epoch - 18ms/step
## Epoch 100/100
## 9/9 - 0s - loss: 0.1655 - categorical_accuracy: 0.9107 - val_loss: 0.3433 - val_categorical_accuracy: 0.8929 - 165ms/epoch - 18ms/step
Sys.time() - start
## Time difference of 20.94411 secs
plot(history1)

Test

## final evaluation

'train accuracy'
## [1] "train accuracy"
model_NN %>% evaluate(train_x, train_y)
## 18/18 - 0s - loss: 0.0806 - categorical_accuracy: 0.9786 - 122ms/epoch - 7ms/step
##                 loss categorical_accuracy 
##           0.08063378           0.97857141
'test accuracy'
## [1] "test accuracy"
model_NN %>% evaluate(test_x, test_y)
## 10/10 - 0s - loss: 2.3178 - categorical_accuracy: 0.5562 - 74ms/epoch - 7ms/step
##                 loss categorical_accuracy 
##             2.317789             0.556250
preds = predict(model_NN, test_x)
## 10/10 - 0s - 286ms/epoch - 29ms/step
preds = apply(preds, 1, which.max) - 1
tab = table(preds, test_output)
tab
##      test_output
## preds   0   1   2
##     0 146  31  33
##     1  33  10  25
##     2   1  19  22
round(tab/sum(tab), 4)*100
##      test_output
## preds     0     1     2
##     0 45.62  9.69 10.31
##     1 10.31  3.12  7.81
##     2  0.31  5.94  6.88
preds = predict(model_NN,train_x)
## 18/18 - 0s - 291ms/epoch - 16ms/step
preds = apply(preds, 1, which.max) - 1
tab = table(preds, train_output)
tab
##      train_output
## preds   0   1   2
##     0 360   0   7
##     1   0 120   5
##     2   0   0  68
round(tab/sum(tab), 4)*100
##      train_output
## preds     0     1     2
##     0 64.29  0.00  1.25
##     1  0.00 21.43  0.89
##     2  0.00  0.00 12.14